The overarching goal of this project is to determine the degree to which several message framing interventions might enhance message effectiveness and intentions, norms, and beliefs related to social distancing. Specifically, here we test the effect of humorous framing of health messages promoting social distancing behavior. We use two types of humorous framings:

  • mocking frame: humorously mocking or ridiculing individuals who might choose to not follow social distancing even though their circumstances might clearly allow them to socially distance themselves.
  • encouraging frame: humorous framing of the health message which does not overtly mock any individual or group.

Participants were randomly assigned to either a message framing intervention group (using encouraging or mocking humor), a control message group, or a group that saw no messages. Each participant in the intervention and message control groups saw a series of 5 messages about social distancing related to COVID-19 randomly sampled from a pool of 15 messages for pilot 2, which previously normed for argument strength (M = 4.16, SD = 0.14, possible range = 1-5). Each message was created to look like an instagram post that included a visual message about COVID-19 accompanied by a “post” about the message.The message control condition contained this stem only, whereas the humorous-framing conditions contained additional text framing the messages humorously (i.e., adding a joke, using the stem as premise). Particpants then completed various outcome and individual differences measures.

Pretest

We ran two short pre-tests to test our intended manipulations of funniness and mockingness in the non-mocking and mocking humorous texts we designed. For non-mocking messages, we recuited 32 participants, and for mocking messages, we recruited 37 participants. Participants were asked to rate how funny (1 = Not at all funny, 5 = Extremely funny) and mocking (1 =Not at all mocking, 5 = Extremely mocking) these humorous texts were.

df <- read.csv('pretest1/COVID-19 humorous messages testing_March 26, 2020_20.08.csv')

### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]

### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]

### Recoding values
recoding_cols <- colnames(df)[11:198]
for (c in recoding_cols) {
  df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}

### Extract encouraging (non-mocking) messages
df_enc <- df %>%
  dplyr::select(!matches("^moc_")) %>%
  mutate(attention_check = enc_1.1_11 == 3 & enc_1.2_11 == 1 & 
                           enc_3.1_11 == 5 & enc_3.2_11 == 2 & 
                           enc_5.1_10 == 1 & enc_5.2_10 == 3) %>% # attention checks
  filter(attention_check == TRUE) %>%
  dplyr::select(-enc_1.1_11, -enc_1.2_11, -enc_3.1_11, -enc_3.2_11, -enc_5.1_10, -enc_5.2_10) # removing attention check variables

### convert from wide to long form
df_enc <- df_enc %>%
  dplyr::select(matches("enc_")) %>%
  mutate(SID = sprintf("S%02d", seq.int(nrow(.)))) %>%
  gather("item", "score", -SID) %>%
  extract(item, c("block", "question_type", "msg_number"), "enc_([1-5]).([1-2])_([0-9]+)", remove = TRUE) %>%
  mutate(item = sprintf("enc_%s_%s", block, msg_number)) %>%
  mutate(question_type  = ifelse(question_type == 1, "funniness",
                                 ifelse(question_type == 2, "mocking", "other"))) %>%
  dplyr::select(SID, item, question_type, score) %>%
  spread(question_type, score) 

### load item texts and merge with data frame
item_texts <- read.csv('pretest1/encouraging_stim.csv') %>%
  dplyr::select(item, text, image)

df_enc <- df_enc %>%
  left_join(., item_texts, by = "item")

############################################
# Loading data and formatting it to make it analyzable
df <- read.csv('pretest2/COVID-19 humorous messages testing (mocking only)_March 31, 2020_18.26.csv')

### Extract variable name data
varnames<- df[1:2,]
df <- df[3:dim(df)[1],]

### Filter based on progress
df$Progress<- as.numeric(as.character(df$Progress))
df <- df[df$Progress >= 90,]

### Recoding values
recoding_cols <- colnames(df)[11:64]
for (c in recoding_cols) {
  df[,c] <- recode(df[,c], `Not at all funny` = 1, `A bit funny` = 2, `Somewhat funny` = 3, `Very funny` = 4, `Extremely funny` = 5, `Not at all mocking` = 1, `A bit mocking` = 2, `Somewhat mocking` = 3, `Very mocking` = 4, `Extremely mocking` = 5)
}

### Removing unwanted columns
df <- df[,c("ResponseId", recoding_cols)]

### Filtering based on attention checks
df$attention_check <- df$att_check_funniness == 4 & df$att_check_mocking == 3 &
                      df$att_check_funniness.1 == 1 & df$att_check_mocking.1 == 4

df <- df[df$attention_check,]

### Removing attention check variables
df$att_check_funniness <- NULL
df$att_check_funniness.1 <- NULL
df$att_check_mocking <- NULL
df$att_check_mocking.1 <- NULL
df$attention_check <- NULL

### convert from wide to long form
list_items <- colnames(df)[2:51]
df <- df %>%
  gather(key = "item", value = "value", list_items) %>%
  separate(item, c("image", "joke_number", "rating_type")) %>%
  unite("item", c("image", "joke_number")) %>%
  spread(rating_type, value)

### extracting list of joke texts
varnames <- varnames[1,list_items]
varnames <- varnames %>%
  gather(key = "item", value = "text") %>%
  separate(item, c("image", "joke_number", "rating_type")) %>%
  unite("item", c("image", "joke_number"))
varnames  <- varnames[varnames$rating_type == "funniness",]
varnames <- varnames %>%
  separate(text, c("remove_this", "joke_text"), sep = "  - Please indicate how funny you find the following messages. - ") %>%
  separate(item, c("remove_this2", "item"), sep = "X")
varnames$remove_this <- NULL
varnames$remove_this2 <- NULL
varnames$rating_type <- NULL

### merge joke texts with the data
df <- df %>%
  separate(item, c("remove_this", "item"), sep = "X") %>%
  dplyr::select(-remove_this)
df_mock <- merge(df, varnames, by = "item")

rm(df)

List of humorous texts and ratings

NOTE: In the pilots and subsequent studies, the stimuli are images paired with texts in the format of an Instagram post. We work with an initial set of 33 images, and each pretested humorous text corresponds to the one of these images. The image indices corresponding to the texts are mentioned in these tables.

Non-mocking messages

Jokes in decreasing order of funniness

Best jokes per image

For each image number, we selected the joke text with the highest funniness rating.

Mocking messages

Jokes in decreasing order of funniness

Best jokes per image

For each image number, we selected the joke text with the highest funniness rating.

Selection of messages for pilot studies

For Pilot 1, we selected texts which had a funniness rating of more than 2 (which corresponded to “a bit funny”), and for each image, we selected the corresponding humorous text (only non-mocking) with the highest funniness score. For Pilot 2, we selected humorous texts (both mocking and non-mocking) which were about social distancing and had a score greater than 1.9 (making the threshold of 2 a bit lenient to accommodate more stimuli), resulting in 12 non-mocking messages and 13 mocking messages.

Pilot 1

Here, we test the effect of “encouraging” humor versus message control, which were non-humorous. Encouraging humor refers to humorous framing of health messages such that these messages were not overtly mocking any person or group of people.

## tidy data for analysis

data = data_pilot1

messages = data %>%
  filter(condition %in% c("message control", "encouraging")) %>%
  filter(grepl("msg", survey_name)) %>%
  mutate(value = as.numeric(value),
         value = ifelse(item == "cognition_2", abs(6 - value), value),
         value = ifelse(item == "cognition_4", abs(6 - value), value),
         value = ifelse(item == "cognition_6", abs(6 - value), value)) %>% 
  extract(item, "item", "msg_.*_(.*)") %>%
  spread(survey_name, value) %>%
  mutate(msg_favorability = msg_positive - msg_negative) %>%
  dplyr::select(-msg_negative, -msg_positive) %>%
  gather(survey_name, value, contains("msg")) %>%
  mutate(item = sprintf("%s_%s", survey_name, item))

data_tidy = data %>%
  filter(condition %in% c("message control", "encouraging")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("cognition|intentions|norms_close|norms_town|beliefs|beliefs|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
  mutate(value = as.numeric(value)) %>% 
  bind_rows(messages)

control_vars = data %>%
  filter(grepl("state|gender|^age$", survey_name)) %>%
  dplyr::select(condition, SID, survey_name, value) %>%
  unique() %>%
  spread(survey_name, value) %>%
  mutate(state = as.factor(state),
         gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
         age = scale(as.numeric(age), center = TRUE, scale = FALSE))

data_person = data_tidy %>%
  filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
  group_by(condition, SID, survey_name) %>%
  summarize(value = mean(value, na.rm = TRUE))

number of subjects per condition

plot_cond = function(data, survey, item=TRUE, palette=palette) {
  if (item == FALSE){
    data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    mutate(value = as.numeric(value)) %>%
    ggplot(aes(survey_name, value, color = condition)) +
    stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
    scale_color_manual(values = palette) +
    labs(x = "", y = "value\n") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1),
          legend.position = "top")
  } else {
    data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    mutate(value = as.numeric(value)) %>%
    ggplot(aes(item, value, color = condition)) +
    stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
    scale_color_manual(values = palette) +
    labs(x = "", y = "value\n") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1),
          legend.position = "top")
  }
}

plot_compare = function(data, survey = ".*", palette, condition = FALSE) {

  rating_means = data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    group_by(survey_name) %>%
    summarize(mean = mean(value))
  
  if (condition == TRUE) {
    data %>%
      filter(grepl(!!(survey), survey_name)) %>%
      ggplot(aes(message, value, color = condition)) +
      stat_summary(fun.data = "mean_cl_boot") +
      coord_flip() +
      geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
      facet_grid(~survey_name) +
      labs(x = "message\n", y = "\nvalue") +
      scale_color_manual(values = palette_cond) +
      theme_minimal() +
      theme(legend.position = "top")
    
  } else {
    data %>%
      filter(grepl(!!(survey), survey_name)) %>%
      ggplot(aes(message, value)) +
      stat_summary(fun.data = "mean_cl_boot") +
      coord_flip() +
      geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
      facet_grid(~survey_name) +
      labs(x = "message\n", y = "\nvalue") +
      scale_color_manual(values = palette_cond) +
      theme_minimal() +
      theme(legend.position = "top")
  }
}

data_comp = messages %>%
  filter(!survey_name == "msg_familiarity") %>%
  extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE)

Visualizing person-level effects

A summary of condition effects on message ratings and other DVs/covariates at person level.

run models

standard

Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

self motivation

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) -0.09288 0.189 28 -0.4913 0.627
conditionencouraging 0.2143 0.2871 28 0.7464 0.4616

social motivation

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) 0.04142 0.191 27.5 0.2168 0.8299
conditionencouraging -0.09573 0.2899 27.99 -0.3303 0.7436

sharing

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) -0.09035 0.1955 27.85 -0.4621 0.6476
conditionencouraging 0.2077 0.2952 27.95 0.7034 0.4876

social relevance

  • person-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) -0.02791 0.1994 31.87 -0.14 0.8895
conditionencouraging 0.04483 0.2861 27.72 0.1567 0.8766

intentions

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.2723 0.2739 0.9945 0.3285
conditionmessage control -0.4806 0.3638 -1.321 0.1972

norm (close)

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.08179 0.2815 0.2906 0.7735
conditionmessage control -0.1443 0.374 -0.386 0.7024

norm (city/town)

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.3322 0.2697 1.232 0.2282
conditionmessage control -0.5862 0.3582 -1.636 0.1129

beliefs

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.1218 0.2806 -0.434 0.6676
conditionmessage control 0.2149 0.3728 0.5765 0.5689

moderation by need for cognition

Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

motivation self

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) -0.1091 0.209 26 -0.5221
conditionencouraging 0.3417 0.3259 26 1.049
cognition 0.04412 0.2194 26 0.2011
conditionencouraging:cognition 0.1865 0.3287 26 0.5674
  Pr(>|t|)
(Intercept) 0.606
conditionencouraging 0.304
cognition 0.8422
conditionencouraging:cognition 0.5753

motivation other

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.0477 0.2132 25.77 0.2238
conditionencouraging -0.02268 0.3319 25.99 -0.06832
cognition -0.01703 0.2235 26 -0.07621
conditionencouraging:cognition 0.1817 0.3348 26 0.5428
  Pr(>|t|)
(Intercept) 0.8247
conditionencouraging 0.9461
cognition 0.9398
conditionencouraging:cognition 0.5919

sharing

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) -0.2164 0.1952 25.77 -1.108
conditionencouraging 0.557 0.3028 25.94 1.84
cognition 0.342 0.2039 25.96 1.678
conditionencouraging:cognition 0.1212 0.3055 25.96 0.3967
  Pr(>|t|)
(Intercept) 0.278
conditionencouraging 0.07726
cognition 0.1054
conditionencouraging:cognition 0.6949

social relevance

  • person-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.005875 0.2196 29.67 0.02675
conditionencouraging 0.0937 0.3265 25.78 0.287
cognition -0.09244 0.2202 25.96 -0.4199
conditionencouraging:cognition 0.2639 0.3301 26.03 0.7995
  Pr(>|t|)
(Intercept) 0.9788
conditionencouraging 0.7764
cognition 0.678
conditionencouraging:cognition 0.4313

intentions

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.2613 0.3221 0.8113 0.4246
conditionmessage control -0.4871 0.4199 -1.16 0.2566
cognition -0.02284 0.3153 -0.07243 0.9428
conditionmessage control:cognition 0.07023 0.4235 0.1659 0.8696

mediation via norms

mod1 = lm(norm_close ~ condition, data = data_mod_person)

mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)

mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)

  • Betas are standardized regression coefficients
  • person-level

motivation self

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0364      -0.3241         0.21    0.74
## ADE             -0.2237      -0.9475         0.49    0.56
## Total Effect    -0.2601      -0.9872         0.47    0.52
## Prop. Mediated   0.0641      -2.2194         3.17    0.71
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

motivation other

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME           -0.01468     -0.21356         0.18    0.86
## ADE             0.13878     -0.58516         0.87    0.70
## Total Effect    0.12410     -0.63850         0.90    0.76
## Prop. Mediated  0.00781     -1.93115         1.79    0.93
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

sharing

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0409      -0.3330         0.18    0.75
## ADE             -0.2154      -0.9569         0.51    0.59
## Total Effect    -0.2564      -1.0044         0.54    0.53
## Prop. Mediated   0.0633      -2.3657         1.95    0.75
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

Relevance social

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0396      -0.3384         0.24    0.75
## ADE              0.0146      -0.6738         0.70    0.98
## Total Effect    -0.0249      -0.7650         0.70    0.92
## Prop. Mediated   0.0612      -3.0876         3.82    0.78
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

intentions

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME            -0.0544      -0.4079         0.25    0.74
## ADE             -0.4078      -1.0948         0.25    0.24
## Total Effect    -0.4622      -1.2241         0.27    0.23
## Prop. Mediated   0.1028      -1.4979         2.50    0.64
## 
## Sample Size Used: 30 
## 
## 
## Simulations: 1000

Pilot 2

Here, we test the effect of “encouraging” and “mocking” humor versus message control, which were non-humorous. Mocking humor refers to to humorous framing of health messages such that these messages were ridiculing individuals who would choose to not follow COVID-19 related preventative measures (like social distancing), even though their circumstances allow them to follow those measures. In contrast, encouraging messages used humorous framings which did not overtly mock or ridicule any person or group of people. In this study, we used the following sets of stimuli:

  • mocking-paired, encouraging-paired: 6 messages in each condition, such that each message in mocking condition shares the first sentence (or message control) with one message in the encouraging condition
  • mocking-unpaired: 6 messages which are not paired with encouraging condition
  • encouraging-unpaired: 5 messages which are not paired with mocking condition

In this analysis, we combine mocking-paired and mocking-unpaired into “mocking” condition, and similarly, we combine encouraging-paired and encouraging-unpaired into “encouraging” condition.

## tidy data for analysis

data = data_pilot2

messages = data %>%
  filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired", 
                          "mocking-unpaired", "mocking-paired")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("msg", survey_name)) %>%
  mutate(value = as.numeric(value),
         value = ifelse(item == "cognition_2", abs(6 - value), value),
         value = ifelse(item == "cognition_4", abs(6 - value), value),
         value = ifelse(item == "cognition_6", abs(6 - value), value)) %>% 
  extract(item, "item", "msg_.*_(.*)") %>%
  spread(survey_name, value) %>%
  mutate(msg_favorability = msg_positive - msg_negative) %>%
  dplyr::select(-msg_negative, -msg_positive) %>%
  gather(survey_name, value, contains("msg")) %>%
  mutate(item = sprintf("%s_%s", survey_name, item))

data_tidy = data %>%
  filter(condition %in% c("message control", "encouraging-unpaired", "encouraging-paired", 
                          "mocking-unpaired", "mocking-paired")) %>%
  mutate(condition = str_replace(condition, "-paired|-unpaired", "")) %>%
  filter(grepl("cognition|intentions|norms_close|norms_town|beliefs_safe|beliefs_norms|politics_party|politics_conserv|^age$|gender", survey_name)) %>%
  mutate(value = as.numeric(value)) %>% 
  bind_rows(messages)

control_vars = data %>%
  filter(grepl("state|gender|^age$", survey_name)) %>%
  dplyr::select(condition, SID, survey_name, value) %>%
  unique() %>%
  spread(survey_name, value) %>%
  mutate(state = as.factor(state),
         gender = recode(gender, "1" = "male", "2" = "female", "3" = "other", "4" = "prefer not to say"),
         age = scale(as.numeric(age), center = TRUE, scale = FALSE))

data_person = data_tidy %>%
  filter(grepl("msg|cognition|beliefs|intentions1_2|intentions1_4|intentions1_6|intentions1_10|norms_close1_2|norms_close1_4|norms_close1_6|norms_close1_10|norms_town1_2|norms_town1_4|norms_town1_6|norms_town1_10|politics_party|politics_conserv", item)) %>%
  group_by(condition, SID, survey_name) %>%
  summarize(value = mean(value, na.rm = TRUE))

number of subjects per condition

plot_cond = function(data, survey, item=TRUE, palette=palette) {
  if (item == FALSE){
    data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    mutate(value = as.numeric(value)) %>%
    ggplot(aes(survey_name, value, color = condition)) +
    stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
    scale_color_manual(values = palette) +
    labs(x = "", y = "value\n") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1),
          legend.position = "top")
  } else {
    data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    mutate(value = as.numeric(value)) %>%
    ggplot(aes(item, value, color = condition)) +
    stat_summary(fun.data = "mean_cl_boot", geom = "pointrange", position = position_dodge(width = .5)) +
    scale_color_manual(values = palette) +
    labs(x = "", y = "value\n") +
    theme_minimal() +
    theme(axis.text.x = element_text(angle = 45, hjust = 1),
          legend.position = "top")
  }
}

plot_compare = function(data, survey = ".*", palette, condition = FALSE) {

  rating_means = data %>%
    filter(grepl(!!(survey), survey_name)) %>%
    group_by(survey_name) %>%
    summarize(mean = mean(value))
  
  if (condition == TRUE) {
    data %>%
      filter(grepl(!!(survey), survey_name)) %>%
      ggplot(aes(message, value, color = condition)) +
      stat_summary(fun.data = "mean_cl_boot") +
      coord_flip() +
      geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
      facet_grid(~survey_name) +
      labs(x = "message\n", y = "\nvalue") +
      scale_color_manual(values = palette_cond) +
      theme_minimal() +
      theme(legend.position = "top")
    
  } else {
    data %>%
      filter(grepl(!!(survey), survey_name)) %>%
      ggplot(aes(message, value)) +
      stat_summary(fun.data = "mean_cl_boot") +
      coord_flip() +
      geom_hline(data = rating_means, aes(yintercept = mean), linetype = "dotted") +
      facet_grid(~survey_name) +
      labs(x = "message\n", y = "\nvalue") +
      scale_color_manual(values = palette_cond) +
      theme_minimal() +
      theme(legend.position = "top")
  }
}

data_comp = messages %>%
  filter(!survey_name == "msg_familiarity") %>%
  extract(item, "message", "msg_.*_([0-9]{2})", remove = FALSE)

Visualizing person-level effects

A summary of condition effects on message ratings and other DVs/covariates at person level.

other DVs and covariates

item level

A summary of condition effects on each survey item.

survey level

A summary of condition effects on each survey, aggregated across survey items.

run models

standard

Models = lmer(DV ~ condition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

self motivation

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) 0.1759 0.1372 70.03 1.282 0.2041
conditionencouraging -0.1304 0.2168 70.23 -0.6016 0.5494
conditionmocking -0.4119 0.2057 70.07 -2.002 0.04914

social motivation

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) 0.1703 0.1407 70.3 1.21 0.2302
conditionencouraging -0.1351 0.2214 71.93 -0.6099 0.5438
conditionmocking -0.3831 0.2104 71.8 -1.821 0.07274

sharing

  • message-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) 0.009405 0.1648 71.93 0.05708 0.9546
conditionencouraging -0.236 0.2601 71.87 -0.9072 0.3673
conditionmocking 0.1429 0.2469 71.93 0.579 0.5644

Relevance social

  • person-level
  Estimate Std. Error df t value Pr(>|t|)
(Intercept) 0.1253 0.1337 70.31 0.9366 0.3521
conditionencouraging 0.08531 0.2109 71.18 0.4045 0.6871
conditionmocking -0.4758 0.2003 70.97 -2.376 0.02021

intentions

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.07621 0.2258 -0.3376 0.7367
conditionmessage control 0.02089 0.2914 0.07167 0.9431
conditionmocking 0.2089 0.3057 0.6833 0.4966

norm (close)

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.4682 0.2143 -2.185 0.03221
conditionmessage control 0.4793 0.2767 1.732 0.08755
conditionmocking 0.8445 0.2902 2.91 0.004819

norm (city/town)

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.4389 0.2135 -2.056 0.04348
conditionmessage control 0.3906 0.2756 1.417 0.1608
conditionmocking 0.865 0.2891 2.993 0.003803

moderation by need for cognition

Models = lmer(DV ~ condition x cognition + (1 | SID) + (1 | message), data = data_mod)

  • Betas are standardized regression coefficients

motivation self

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.1762 0.1397 67.17 1.261
conditionencouraging -0.1303 0.2208 67.33 -0.5901
conditionmocking -0.4115 0.2094 67.19 -1.965
cognition -0.04103 0.129 67.99 -0.318
conditionencouraging:cognition 0.05924 0.282 67.9 0.21
conditionmocking:cognition -0.04964 0.1926 67.93 -0.2578
  Pr(>|t|)
(Intercept) 0.2116
conditionencouraging 0.5571
conditionmocking 0.05354
cognition 0.7515
conditionencouraging:cognition 0.8343
conditionmocking:cognition 0.7973

motivation other

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.1701 0.1432 67.68 1.187
conditionencouraging -0.1317 0.2255 69.01 -0.5839
conditionmocking -0.3833 0.2141 68.92 -1.79
cognition 0.0277 0.1303 68.01 0.2125
conditionencouraging:cognition 0.121 0.2847 67.79 0.425
conditionmocking:cognition 0.008489 0.1944 67.86 0.04367
  Pr(>|t|)
(Intercept) 0.2392
conditionencouraging 0.5612
conditionmocking 0.07788
cognition 0.8323
conditionencouraging:cognition 0.6722
conditionmocking:cognition 0.9653

sharing

  • message-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.007055 0.1629 68.89 0.04331
conditionencouraging -0.2263 0.2572 68.83 -0.8797
conditionmocking 0.1448 0.2441 68.89 0.5931
cognition 0.2667 0.1497 68.05 1.781
conditionencouraging:cognition 0.08984 0.3273 67.96 0.2745
conditionmocking:cognition -0.2659 0.2234 67.99 -1.19
  Pr(>|t|)
(Intercept) 0.9656
conditionencouraging 0.3821
conditionmocking 0.555
cognition 0.07931
conditionencouraging:cognition 0.7845
conditionmocking:cognition 0.2381

Relevance social

  • person-level
Table continues below
  Estimate Std. Error df t value
(Intercept) 0.1267 0.1305 67.26 0.9704
conditionencouraging 0.09425 0.2059 68.12 0.4577
conditionmocking -0.4773 0.1955 67.91 -2.442
cognition -0.1851 0.1195 68.15 -1.549
conditionencouraging:cognition 0.6281 0.2611 67.95 2.406
conditionmocking:cognition 0.2814 0.1783 68.01 1.579
  Pr(>|t|)
(Intercept) 0.3353
conditionencouraging 0.6486
conditionmocking 0.01721
cognition 0.1261
conditionencouraging:cognition 0.01885
conditionmocking:cognition 0.119

intentions

  • person-level
  Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.07243 0.2295 -0.3155 0.7533
conditionmessage control 0.01704 0.2963 0.05753 0.9543
conditionmocking 0.206 0.3107 0.6629 0.5096
cognition 0.1704 0.3367 0.5061 0.6144
conditionmessage control:cognition -0.1636 0.3786 -0.4322 0.667
conditionmocking:cognition -0.3041 0.3875 -0.7848 0.4353

mediation via norms

mod1 = lm(norm_close ~ condition, data = data_mod_person)

mod2 = lm(DV ~ norm_close + condition, data = data_mod_person)

mediation_mod = mediate(mod1, mod2, sims=1000, treat=“condition”, mediator=“mediator”)

  • Betas are standardized regression coefficients
  • person-level

motivation self

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0552      -0.0772         0.23    0.39
## ADE              0.1085      -0.4601         0.69    0.73
## Total Effect     0.1637      -0.3880         0.72    0.56
## Prop. Mediated   0.0693      -3.5615         2.77    0.75
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

motivation other

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0472      -0.0793         0.23    0.51
## ADE              0.1263      -0.4847         0.65    0.63
## Total Effect     0.1735      -0.3864         0.70    0.52
## Prop. Mediated   0.0598      -2.5178         3.11    0.75
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

sharing

  • message-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0330      -0.0881         0.20    0.63
## ADE              0.2274      -0.3351         0.80    0.46
## Total Effect     0.2604      -0.2947         0.81    0.37
## Prop. Mediated   0.0575      -1.7750         2.51    0.72
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

Relevance social

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                Estimate 95% CI Lower 95% CI Upper p-value
## ACME             0.0833      -0.0311         0.29    0.23
## ADE             -0.2032      -0.8017         0.38    0.51
## Total Effect    -0.1199      -0.6970         0.47    0.68
## Prop. Mediated  -0.0845      -4.1738         2.78    0.77
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000

intentions

  • person-level
## 
## Causal Mediation Analysis 
## 
## Quasi-Bayesian Confidence Intervals
## 
##                 Estimate 95% CI Lower 95% CI Upper p-value  
## ACME             0.26654     -0.02855         0.60    0.07 .
## ADE             -0.26048     -0.75091         0.21    0.30  
## Total Effect     0.00606     -0.57329         0.56    0.99  
## Prop. Mediated   0.23208    -19.96642        14.98    0.95  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Sample Size Used: 74 
## 
## 
## Simulations: 1000